In [1]:
import sys
import os
sys.path.append('../src/')
sys.path.append('/home/ipl/installs/caffe-rc/python/')
import matplotlib
%matplotlib inline
from matplotlib import pylab as plt
import numpy as np
import scipy.misc
import scipy.stats
import scipy.io
import caffe
import cv2
import re

from sklearn import svm
from sklearn import metrics
import utils
from datetime import datetime as dt

from dataset import CUB_200_2011
from storage import datastore
from deep_extractor import CNN_Features_CAFFE_REFERENCE
from datetime import datetime as dt
import settings
from parts import *
from cub_utils import *
import skimage
import math
import copy

import sklearn.ensemble

In [2]:
KTH_ROOT = '/home/ipl/datasets/KTH-Football/FOOTBALL5907/'
KTH_ANNOTATION_FILE = '/home/ipl/datasets/KTH-Football/FOOTBALL5907/labels.mat'
KTH_ANNOTATION_KEY = 'ptsAll'
kth_labels = scipy.io.loadmat(KTH_ANNOTATION_FILE)
kth_labels = kth_labels[KTH_ANNOTATION_KEY]

In [3]:
def annotation_to_parts(num, kth_labels=kth_labels):
    annotations = kth_labels[:, :, num-1]
    gt_parts = Parts()
    for i, a in enumerate(annotations):
        gt_parts.append(Part(num, str(i), i, a[0], a[1], True))
    return gt_parts

def get_part_train_points(part, img_size, N=10, var=3):
    result = Parts()
    
    xs = np.random.normal(loc=part.y, scale=var, size=N)
    ys = np.random.normal(loc=part.x, scale=var, size=N)
    
    for x, y in zip(xs, ys):
        if 0 <= x <= img_size[0] and 0 <= y <= img_size[1]:
            result.append(Part(-1, part.part_name, part.part_id, int(round(y)), int(round(x)), 1))
    
    return result

def part_distance(p1, p2):
    return math.sqrt((p1.x-p2.x)**2 + (p1.y-p2.y)**2)

def get_bg_points(gt_parts, img_size, N=81, no_side=True, thresh=30):
    rect_w = img_size[0]
    rect_h = img_size[1]
    if not no_side:
        x_step = rect_h / (np.sqrt(N) - 1)
        y_step = rect_w / (np.sqrt(N) - 1)
    else:
        x_step = rect_h / (np.sqrt(N) + 1)
        y_step = rect_w / (np.sqrt(N) + 1)

    x_count = int(rect_h / x_step)
    y_count = int(rect_w / y_step)

    points = Parts()
    if not no_side:
        x_from, y_from = 0, 0
        x_to, y_to = x_count + 1, y_count + 1
    else:
        x_from, y_from = 1, 1
        x_to, y_to = x_count, y_count
    for i in range(x_from, x_to):
        for j in range(y_from, y_to):
            x = (int(((i) * x_step)))
            y = (int(((j) * y_step)))
            points.append(Part(-1, 'bg', -1, x, y, 1))
    
    
    fpoints = Parts()
    for p in points:
        for gt_p in gt_parts:
            if part_distance(gt_p, p) < thresh:
                break
        else:
            fpoints.append(p)
    
    return fpoints

def points_for_image(num, img=None):
    if img is None:
        file_address = os.path.join(KTH_ROOT, "%s.jpg" % str(num).zfill(5))
        img = caffe.io.load_image(file_address)
    
    gt_parts = annotation_to_parts(num)
    d = [0] * (len(gt_parts) + 1)
    
    d[0] = get_bg_points(gt_parts, img.shape)
    for i in range(len(gt_parts)):
        d[i+1] = get_part_train_points(gt_parts[i], img.shape)
    
    return d

def features_for_image(num, dh):
    file_address = os.path.join(KTH_ROOT, "%s.jpg" % str(num).zfill(5))
    img = caffe.io.load_image(file_address)
    
    dh.init_with_image(img)
    d = points_for_image(num, img)
    
    y = []
    Xs = []
    for i in range(len(d)):
        y.extend([i]*len(d[i]))
        d[i].norm_for_size(img.shape[1], img.shape[0], dh.input_dim)
        Xs.append(dh.features(d[i]))
    
    X = np.vstack(Xs)
    y = np.array(y)
    
    return X, y
    

def show_img(num):
    file_address = os.path.join(KTH_ROOT, "%s.jpg" % str(num).zfill(5))
    img = caffe.io.load_image(file_address)
    
    
    fig = plt.figure(figsize=(7, 7))
    ax = fig.add_subplot(111)
    
    gt_parts = annotation_to_parts(num)
    
    d_bg = get_bg_points(gt_parts, img.shape)
    d_bg.draw_part(ax, 'black')
    
    d = [0] * len(gt_parts)
    for i in range(len(gt_parts)):
        d[i] = get_part_train_points(gt_parts[i], img.shape)
        d[i].draw_part(ax, plt.cm.jet(1. * i / (len(gt_parts) - 1)))
    
    ax.imshow(img)

In [4]:
show_img(1035)



In [5]:
dh = DeepHelper()

In [6]:
def extract_all_features(NS=1,NT=100):
    Xs, ys = [], []
    for i in range(NS, NT+1):
        X, y = features_for_image(i, dh)
        Xs.append(X)
        ys.append(y)
    
    y = np.hstack(ys)
    X = np.vstack(Xs)
    
    return X,y

In [7]:
Xtrain, ytrain = extract_all_features(NT=3900)
Xtest, ytest = extract_all_features(NS=3900, NT=5907)

In [8]:
model = sklearn.ensemble.RandomForestClassifier(n_estimators=10, max_depth=20, n_jobs=3)

In [9]:
model.fit(Xtrain, ytrain)


Out[9]:
RandomForestClassifier(bootstrap=True, compute_importances=None,
            criterion='gini', max_depth=20, max_features='auto',
            max_leaf_nodes=None, min_density=None, min_samples_leaf=1,
            min_samples_split=2, n_estimators=10, n_jobs=3,
            oob_score=False, random_state=None, verbose=0)

In [10]:
preds = model.predict(Xtest)

In [11]:
print metrics.classification_report(ytest, preds)


             precision    recall  f1-score   support

          0       0.98      0.99      0.99     99447
          1       0.70      0.68      0.69     20080
          2       0.79      0.81      0.80     20080
          3       0.79      0.89      0.83     20080
          4       0.78      0.88      0.83     20080
          5       0.79      0.82      0.81     20080
          6       0.69      0.68      0.68     20080
          7       0.74      0.56      0.64     20080
          8       0.78      0.80      0.79     20080
          9       0.83      0.87      0.85     20080
         10       0.85      0.84      0.85     20080
         11       0.76      0.82      0.79     20080
         12       0.75      0.57      0.65     20080
         13       0.82      0.89      0.85     20080
         14       0.97      0.95      0.96     20080

avg / total       0.84      0.84      0.84    380567


In [12]:
def gen_dense_points(xdim=227, ydim=227):
    dense_points = Parts()
    for i in range(xdim):
        for j in range(ydim):
            dense_points.append(Part(-1, '?', -1, i, j, 1))
    return dense_points

In [13]:
dpts = gen_dense_points()

In [31]:
def pred_for_image(num, model):
    file_address = os.path.join(KTH_ROOT, "%s.jpg" % str(num).zfill(5))
    img = caffe.io.load_image(file_address)
    
    fig = plt.figure(figsize=(14, 7))
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)
    
    dh.init_with_image(img)
    X = dh.features(dpts)
    preds = model.predict(X)
    pred_img = preds.reshape((227,227)).T
    
    ax1.imshow(img)
    cax = ax2.matshow(pred_img)
    
    fig.colorbar(cax)

In [40]:
def pred_for_image_prob(num, part, model):
    file_address = os.path.join(KTH_ROOT, "%s.jpg" % str(num).zfill(5))
    img = caffe.io.load_image(file_address)
    
    fig = plt.figure(figsize=(14, 7))
    
    dh.init_with_image(img)
    X = dh.features(dpts)
    preds = model.predict_proba(X)
    pred_img = preds[:, part].reshape((227,227)).T
    
    preds_prob_resize = cv2.resize(pred_img, (img.shape[1], img.shape[0]))
    img_gray = skimage.color.rgb2gray(img)

    ax = fig.add_subplot(121)
    ax.imshow(img)
    ax.axis('off')

    ax = fig.add_subplot(122)
    cax = ax.matshow(preds_prob_resize, cmap=plt.cm.Reds, alpha=1)
    ax.imshow(img_gray, alpha=0.3, cmap=plt.cm.gray)
    
    fig.colorbar(cax)
    ax.axis('off')

In [45]:
for i in range(15):
    pred_for_image_prob(985, i, model)


0 BG 1 Right Foot 2 Right Knee 3 Right Hip 4 Left Hip 5 Left Knee 6 Left Foot 7 Right hand 8 Right Elbow 9 Right Shoulder 10 Left Shoulder 11 Left Elbow 12 Left Hand 13 Bottom Head 14 Top Head

In [ ]: